Prelims

Load packages:

library(tidyverse)
library(brms)

R and package versions for reporting and reproducibility:

R.Version()
## $platform
## [1] "x86_64-apple-darwin17.0"
## 
## $arch
## [1] "x86_64"
## 
## $os
## [1] "darwin17.0"
## 
## $system
## [1] "x86_64, darwin17.0"
## 
## $status
## [1] ""
## 
## $major
## [1] "4"
## 
## $minor
## [1] "0.2"
## 
## $year
## [1] "2020"
## 
## $month
## [1] "06"
## 
## $day
## [1] "22"
## 
## $`svn rev`
## [1] "78730"
## 
## $language
## [1] "R"
## 
## $version.string
## [1] "R version 4.0.2 (2020-06-22)"
## 
## $nickname
## [1] "Taking Off Again"
packageVersion('tidyverse')
## [1] '1.3.0'
packageVersion('brms')
## [1] '2.13.3'

Load data:

freq <- read_csv('../data/frequency_size.csv')
path <- read_csv('../data/path_manner_ground_viewpoint.csv')

Show:

freq
## # A tibble: 54 x 10
##    Language Condition Participant Gender Total_Freq Vertical_Big Lateral_Big
##    <chr>    <chr>           <dbl> <chr>       <dbl>        <dbl>       <dbl>
##  1 Korean   Friend              1 F              91           75          30
##  2 Korean   Friend              2 M              62           20           5
##  3 Korean   Friend              3 F              89           67          28
##  4 Korean   Friend              4 F              30           28          13
##  5 Korean   Friend              5 M              33           24           3
##  6 Korean   Friend              6 F              26           25           1
##  7 Korean   Friend              7 F              63           52          32
##  8 Korean   Friend              8 M              51           44          14
##  9 Korean   Friend              9 M              67           66          15
## 10 Korean   Friend             10 M              61           51          33
## # … with 44 more rows, and 3 more variables: Sagittal_Big <dbl>,
## #   Both_Hands <dbl>, Shape_Open <dbl>
path
## # A tibble: 54 x 11
##    Language Condition    ID Gender TotalEvents  Path Manner Ground Character
##    <chr>    <chr>     <dbl> <chr>        <dbl> <dbl>  <dbl>  <dbl>     <dbl>
##  1 Korean   Friend        1 F               16    13     12      3         8
##  2 Korean   Friend        2 M               14     8     10      0         8
##  3 Korean   Friend        3 F               15    12     12      4         5
##  4 Korean   Friend        4 F                7     6      6      2         4
##  5 Korean   Friend        5 M                8     7      5      0         3
##  6 Korean   Friend        6 F               13    11      9      3         6
##  7 Korean   Friend        7 F               10     7      7      4         5
##  8 Korean   Friend        8 M               13    10      9      3         4
##  9 Korean   Friend        9 M               15    12     10      5         8
## 10 Korean   Friend       10 M               13    10      7      2         5
## # … with 44 more rows, and 2 more variables: Observer <dbl>, Dual <dbl>

We can just append the two into a new tibble called ‘dyads’:

dyads <- bind_cols(freq, select(path, TotalEvents:Dual))

Create a unique identifier by merging language with ID:

dyads <- mutate(dyads,
                ID = str_c(Language, '_', Participant)) %>% 
  select(-Participant) %>% 
  select(ID, Language, Condition, Gender, Total_Freq:Dual)

Sort by ID so that polite/informal are next to each other:

dyads <- arrange(dyads,
                 ID, Condition)

Check one more time:

dyads
## # A tibble: 54 x 17
##    ID    Language Condition Gender Total_Freq Vertical_Big Lateral_Big
##    <chr> <chr>    <chr>     <chr>       <dbl>        <dbl>       <dbl>
##  1 Cata… Catalan  Friend    M              61           39          19
##  2 Cata… Catalan  Superior  M              78           60          13
##  3 Cata… Catalan  Friend    F              61           46          12
##  4 Cata… Catalan  Superior  F              73           42           7
##  5 Cata… Catalan  Friend    M              58           55          24
##  6 Cata… Catalan  Superior  M              51           43          14
##  7 Cata… Catalan  Friend    M             101           73          33
##  8 Cata… Catalan  Superior  M              82           52          31
##  9 Cata… Catalan  Friend    M              56           49          20
## 10 Cata… Catalan  Superior  M              74           62          19
## # … with 44 more rows, and 10 more variables: Sagittal_Big <dbl>,
## #   Both_Hands <dbl>, Shape_Open <dbl>, TotalEvents <dbl>, Path <dbl>,
## #   Manner <dbl>, Ground <dbl>, Character <dbl>, Observer <dbl>, Dual <dbl>

Settings for Bayesian analysis (same across all)

Settings for parallel processing:

options(mc.cores=parallel::detectCores())

Weakly informative priors:

my_priors <- c(prior('normal(0, 2)', class = 'b'))

Control parameters for MCMC sampling:

my_controls = list(adapt_delta = 0.999,
                   max_treedepth = 13)

Iterations for all chains:

my_iter <- 6000
my_warmup <- 4000

Analysis: Gesture frequency

Overall number of gestures per condition:

dyads %>% group_by(Condition) %>% 
  summarize(Freq = sum(Total_Freq)) %>% 
  mutate(Prop = Freq / sum(Freq),
         Prop = round(Prop, 2),
         Percentage = str_c(Prop * 100, '%'))
## `summarise()` ungrouping output (override with `.groups` argument)
## # A tibble: 2 x 4
##   Condition  Freq  Prop Percentage
##   <chr>     <dbl> <dbl> <chr>     
## 1 Friend     1453  0.54 54%       
## 2 Superior   1217  0.46 46%

Overall number of gestures per condition per language:

# Tally by language and condition:

freq_count <- dyads %>% group_by(Language, Condition) %>% 
  summarize(Freq = sum(Total_Freq))
## `summarise()` regrouping output by 'Language' (override with `.groups` argument)
# Take sums by language for calculating proportions, and calculate them:

freq_count <- freq_count %>% group_by(Language) %>% 
  summarize(Total = sum(Freq)) %>%
  right_join(freq_count) %>% 
  mutate(Prop = Freq / Total,
         Prop = round(Prop, 2),
         Percentage = str_c(Prop * 100, '%'))
## `summarise()` ungrouping output (override with `.groups` argument)
## Joining, by = "Language"
# Check:

freq_count
## # A tibble: 4 x 6
##   Language Total Condition  Freq  Prop Percentage
##   <chr>    <dbl> <chr>     <dbl> <dbl> <chr>     
## 1 Catalan   1498 Friend      764  0.51 51%       
## 2 Catalan   1498 Superior    734  0.49 49%       
## 3 Korean    1172 Friend      689  0.59 59%       
## 4 Korean    1172 Superior    483  0.41 41%

Check total frequency by participant:

freqs <- dyads %>% select(Language, ID, Condition, Total_Freq) %>% 
  pivot_wider(names_from = Condition, values_from = Total_Freq) %>% 
  mutate(PoliteDiff = Superior - Friend)

# Check:

freqs %>% print(n = Inf)
## # A tibble: 27 x 5
##    Language ID         Friend Superior PoliteDiff
##    <chr>    <chr>       <dbl>    <dbl>      <dbl>
##  1 Catalan  Catalan_1      61       78         17
##  2 Catalan  Catalan_11     61       73         12
##  3 Catalan  Catalan_12     58       51         -7
##  4 Catalan  Catalan_13    101       82        -19
##  5 Catalan  Catalan_14     56       74         18
##  6 Catalan  Catalan_16     40       43          3
##  7 Catalan  Catalan_2      31       40          9
##  8 Catalan  Catalan_3      81       49        -32
##  9 Catalan  Catalan_4      32       49         17
## 10 Catalan  Catalan_5      39       32         -7
## 11 Catalan  Catalan_6      35       30         -5
## 12 Catalan  Catalan_7      88       73        -15
## 13 Catalan  Catalan_8      59       47        -12
## 14 Catalan  Catalan_9      22       13         -9
## 15 Korean   Korean_1       91       76        -15
## 16 Korean   Korean_10      61       61          0
## 17 Korean   Korean_11      31       16        -15
## 18 Korean   Korean_12      63       42        -21
## 19 Korean   Korean_13      22       10        -12
## 20 Korean   Korean_2       62       37        -25
## 21 Korean   Korean_3       89       49        -40
## 22 Korean   Korean_4       30       30          0
## 23 Korean   Korean_5       33       11        -22
## 24 Korean   Korean_6       26       13        -13
## 25 Korean   Korean_7       63       62         -1
## 26 Korean   Korean_8       51       27        -24
## 27 Korean   Korean_9       67       49        -18

Very consistent picture where Koreans always have less gestures in the superior condition. Compute for how many this is:

freqs <- freqs %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

freqs %>% count(Language, Category)
## # A tibble: 4 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite     8
## 2 Catalan  more polite     6
## 3 Korean   less polite    11
## 4 Korean   same            2

For a Poisson model, we’ll work with the long format, but we’ll have to make language and condition into sum-coded factors to interpret the interaction:

# Make into factor:

dyads <- mutate(dyads,
                Language_c = factor(Language),
                Condition_c = factor(Condition))

# Deviation code with Catalan and Friend as reference level (which is the multiplication by -1)

contrasts(dyads$Language_c) <- (contr.sum(2) * -1) / 2
contrasts(dyads$Condition_c) <- (contr.sum(2) * -1) / 2

Fit Bayesian model:

freq_mdl <- brm(Total_Freq ~ Language_c * Condition_c + (1 + Condition_c|ID),
      data = dyads, family = 'negbinomial',
      prior = my_priors,
      control = my_controls,
      seed = 42,
      init = 0, chains = 4, iter = my_iter, warmup = my_warmup)
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -mmacosx-version-min=10.13 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DBOOST_NO_AUTO_PTR  -include '/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/usr/local/include   -fPIC  -Wall -g -O2  -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:613:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:613:16: error: expected ';' after top level declarator
## namespace Eigen {
##                ^
##                ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
##          ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1

Model summary:

summary(freq_mdl)
##  Family: negbinomial 
##   Links: mu = log; shape = identity 
## Formula: Total_Freq ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   0.50      0.08     0.37     0.69 1.00     1618
## sd(Condition_c1)                0.14      0.09     0.01     0.33 1.00     2078
## cor(Intercept,Condition_c1)     0.44      0.43    -0.67     0.97 1.00     4953
##                             Tail_ESS
## sd(Intercept)                   3496
## sd(Condition_c1)                2456
## cor(Intercept,Condition_c1)     3533
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                    3.79      0.10     3.59     4.00 1.00     1195
## Language_c1                 -0.23      0.20    -0.63     0.16 1.00     1191
## Condition_c1                -0.23      0.07    -0.37    -0.10 1.00     5209
## Language_c1:Condition_c1    -0.36      0.13    -0.62    -0.11 1.00     6009
##                          Tail_ESS
## Intercept                    1953
## Language_c1                  2125
## Condition_c1                 5540
## Language_c1:Condition_c1     5562
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## shape    77.90     60.84    19.48   244.82 1.00     2740     4500
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

What does the interaction mean? For this the sum-codes need to be checked:

contrasts(dyads$Language_c)
##         [,1]
## Catalan -0.5
## Korean   0.5
contrasts(dyads$Condition_c)
##          [,1]
## Friend   -0.5
## Superior  0.5

Get the posterior probability of the interaction and language effects:

posts <- posterior_samples(freq_mdl)

Check whether the condition effect is above zero:

sum(posts$b_Condition_c1 > 0) / nrow(posts)
## [1] 0.00075

Check whether the interaction is above zero:

sum(posts$`b_Language_c1:Condition_c1` > 0) / nrow(posts)
## [1] 0.003125

Posterior predictive checks to see whether our model could’ve predicted the data.

pp_check(freq_mdl, nsamples = 100)

Looks good.

Analysis: Gesture size

Check frequency of vertical and lateral large gestures… which needs to be divided by the total frequency (since the large gestures are a subset of). If we don’t calculate proportions, then the result would just be rehashing the total frequency result, as we know that overall gesture rates are lower for superior.

vertical <- dyads %>% select(Language, ID, Condition, Vertical_Big, Total_Freq) %>% 
  pivot_wider(names_from = Condition, values_from = c(Vertical_Big, Total_Freq)) %>%
  mutate(Vertical_Big_Superior = Vertical_Big_Superior / Total_Freq_Superior,
         Vertical_Big_Friend = Vertical_Big_Friend / Total_Freq_Friend,
         PoliteDiff = Vertical_Big_Superior - Vertical_Big_Friend)
lateral <- dyads %>% select(Language, ID, Condition, Lateral_Big, Total_Freq) %>% 
  pivot_wider(names_from = Condition, values_from = c(Lateral_Big, Total_Freq)) %>%
  mutate(Lateral_Big_Superior = Lateral_Big_Superior / Total_Freq_Superior,
         Lateral_Big_Friend = Lateral_Big_Friend / Total_Freq_Friend,
         PoliteDiff = Lateral_Big_Superior - Lateral_Big_Friend)

# Check:

vertical %>% print(n = Inf)
## # A tibble: 27 x 7
##    Language ID    Vertical_Big_Fr… Vertical_Big_Su… Total_Freq_Frie…
##    <chr>    <chr>            <dbl>            <dbl>            <dbl>
##  1 Catalan  Cata…            0.639           0.769                61
##  2 Catalan  Cata…            0.754           0.575                61
##  3 Catalan  Cata…            0.948           0.843                58
##  4 Catalan  Cata…            0.723           0.634               101
##  5 Catalan  Cata…            0.875           0.838                56
##  6 Catalan  Cata…            0.6             0.837                40
##  7 Catalan  Cata…            0.806           0.775                31
##  8 Catalan  Cata…            0.901           0.918                81
##  9 Catalan  Cata…            0.312           0.143                32
## 10 Catalan  Cata…            0.821           0.906                39
## 11 Catalan  Cata…            0.743           0.667                35
## 12 Catalan  Cata…            0.795           0.699                88
## 13 Catalan  Cata…            0.627           0.617                59
## 14 Catalan  Cata…            0.364           0.385                22
## 15 Korean   Kore…            0.824           0.934                91
## 16 Korean   Kore…            0.836           0.967                61
## 17 Korean   Kore…            0.871           0.875                31
## 18 Korean   Kore…            0.841           0.810                63
## 19 Korean   Kore…            0.636           0.4                  22
## 20 Korean   Kore…            0.323           0.0270               62
## 21 Korean   Kore…            0.753           0.939                89
## 22 Korean   Kore…            0.933           0.9                  30
## 23 Korean   Kore…            0.727           0.545                33
## 24 Korean   Kore…            0.962           0.692                26
## 25 Korean   Kore…            0.825           0.919                63
## 26 Korean   Kore…            0.863           0.407                51
## 27 Korean   Kore…            0.985           0.673                67
## # … with 2 more variables: Total_Freq_Superior <dbl>, PoliteDiff <dbl>
lateral %>% print(n = Inf)
## # A tibble: 27 x 7
##    Language ID    Lateral_Big_Fri… Lateral_Big_Sup… Total_Freq_Frie…
##    <chr>    <chr>            <dbl>            <dbl>            <dbl>
##  1 Catalan  Cata…           0.311            0.167                61
##  2 Catalan  Cata…           0.197            0.0959               61
##  3 Catalan  Cata…           0.414            0.275                58
##  4 Catalan  Cata…           0.327            0.378               101
##  5 Catalan  Cata…           0.357            0.257                56
##  6 Catalan  Cata…           0.475            0.442                40
##  7 Catalan  Cata…           0.419            0.4                  31
##  8 Catalan  Cata…           0.346            0.388                81
##  9 Catalan  Cata…           0.0938           0.0408               32
## 10 Catalan  Cata…           0.410            0.281                39
## 11 Catalan  Cata…           0.2              0.333                35
## 12 Catalan  Cata…           0.534            0.411                88
## 13 Catalan  Cata…           0.237            0.128                59
## 14 Catalan  Cata…           0.0909           0.0769               22
## 15 Korean   Kore…           0.330            0.211                91
## 16 Korean   Kore…           0.541            0.393                61
## 17 Korean   Kore…           0.0645           0                    31
## 18 Korean   Kore…           0.238            0.0714               63
## 19 Korean   Kore…           0.545            0                    22
## 20 Korean   Kore…           0.0806           0                    62
## 21 Korean   Kore…           0.315            0.306                89
## 22 Korean   Kore…           0.433            0.367                30
## 23 Korean   Kore…           0.0909           0                    33
## 24 Korean   Kore…           0.0385           0                    26
## 25 Korean   Kore…           0.508            0.548                63
## 26 Korean   Kore…           0.275            0.0370               51
## 27 Korean   Kore…           0.224            0.204                67
## # … with 2 more variables: Total_Freq_Superior <dbl>, PoliteDiff <dbl>

Very consistent picture where Koreans always have less gestures in the superior condition. Compute for how many this is:

vertical <- vertical %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))
lateral <- lateral %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

vertical %>% count(Language, Category)
## # A tibble: 4 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite     9
## 2 Catalan  more polite     5
## 3 Korean   less polite     8
## 4 Korean   more polite     5
lateral %>% count(Language, Category)
## # A tibble: 4 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite    11
## 2 Catalan  more polite     3
## 3 Korean   less polite    12
## 4 Korean   more polite     1

Average all the big gestures together:

dyads <- mutate(dyads,
                BigGesture = Vertical_Big + Lateral_Big + Sagittal_Big)

Create a model for big gestures — can’t do this because the denominator isn’t right:

# big_mdl <- brm(BigGesture | trials(Total_Freq) ~ Language_c * Condition_c +
#                  (1 + Condition_c|ID),
#                data = dyads, family = binomial,
#                prior = my_priors, control = my_controls,
#                seed = 42,
#                init = 0, chains = 4, iter = my_iter, warmup = my_warmup)

Summarize model:

# summary(big_mdl)

Get the posterior probabilities of the big_mdl:

# posts <- posterior_samples(big_mdl)

Posterior probability of the condition effect being above 0:

# sum(posts$b_Condition_c1 > 0) / nrow(posts)

Posterior probability of the interaction effect being above 0:

# sum(posts$`b_Language_c1:Condition_c1` > 0) / nrow(posts)

Posterior predictive checks to see whether our model could’ve predicted the data, vertical:

# pp_check(big_mdl, nsamples = 100)

Analysis: Use of two-handed gestures

Overall number of gestures per condition:

dyads %>% group_by(Condition) %>% 
  summarize(Freq = sum(Both_Hands)) %>% 
  mutate(Prop = Freq / sum(Freq),
         Prop = round(Prop, 2),
         Percentage = str_c(Prop * 100, '%'))
## `summarise()` ungrouping output (override with `.groups` argument)
## # A tibble: 2 x 4
##   Condition  Freq  Prop Percentage
##   <chr>     <dbl> <dbl> <chr>     
## 1 Friend      659  0.53 53%       
## 2 Superior    578  0.47 47%

Overall number of gestures per condition per language:

# Tally by language and condition:

freq_count <- dyads %>% group_by(Language, Condition) %>% 
  summarize(Freq = sum(Both_Hands))
## `summarise()` regrouping output by 'Language' (override with `.groups` argument)
# Take sums by language for calculating proportions, and calculate them:

freq_count <- freq_count %>% group_by(Language) %>% 
  summarize(Total = sum(Freq)) %>%
  right_join(freq_count) %>% 
  mutate(Prop = Freq / Total,
         Prop = round(Prop, 2),
         Percentage = str_c(Prop * 100, '%'))
## `summarise()` ungrouping output (override with `.groups` argument)
## Joining, by = "Language"
# Check:

freq_count
## # A tibble: 4 x 6
##   Language Total Condition  Freq  Prop Percentage
##   <chr>    <dbl> <chr>     <dbl> <dbl> <chr>     
## 1 Catalan    677 Friend      364  0.54 54%       
## 2 Catalan    677 Superior    313  0.46 46%       
## 3 Korean     560 Friend      295  0.53 53%       
## 4 Korean     560 Superior    265  0.47 47%

Check frequency of two-handed gestures:

both <- dyads %>% select(Language, ID, Condition, Both_Hands, Total_Freq) %>% 
  pivot_wider(names_from = Condition, values_from = c(Both_Hands, Total_Freq)) %>% 
  mutate(Both_Hands_Superior = Both_Hands_Superior / Total_Freq_Superior, 
         Both_Hands_Friend = Both_Hands_Friend / Total_Freq_Friend,
         PoliteDiff = Both_Hands_Superior - Both_Hands_Friend)

# Check:

both %>% print(n = Inf)
## # A tibble: 27 x 7
##    Language ID    Both_Hands_Frie… Both_Hands_Supe… Total_Freq_Frie…
##    <chr>    <chr>            <dbl>            <dbl>            <dbl>
##  1 Catalan  Cata…           0.590            0.628                61
##  2 Catalan  Cata…           0.541            0.767                61
##  3 Catalan  Cata…           0.741            0.706                58
##  4 Catalan  Cata…           0.653            0.415               101
##  5 Catalan  Cata…           0.25             0.230                56
##  6 Catalan  Cata…           0.3              0.0465               40
##  7 Catalan  Cata…           0.516            0.425                31
##  8 Catalan  Cata…           0.346            0.143                81
##  9 Catalan  Cata…           0.531            0.286                32
## 10 Catalan  Cata…           0.410            0.0938               39
## 11 Catalan  Cata…           0.229            0.1                  35
## 12 Catalan  Cata…           0.489            0.589                88
## 13 Catalan  Cata…           0.542            0.681                59
## 14 Catalan  Cata…           0                0                    22
## 15 Korean   Kore…           0.473            0.487                91
## 16 Korean   Kore…           0.426            0.508                61
## 17 Korean   Kore…           0.129            0                    31
## 18 Korean   Kore…           0.651            0.786                63
## 19 Korean   Kore…           0.591            0.8                  22
## 20 Korean   Kore…           0.5              0.595                62
## 21 Korean   Kore…           0.483            0.653                89
## 22 Korean   Kore…           0.467            0.767                30
## 23 Korean   Kore…           0.0303           0                    33
## 24 Korean   Kore…           0.462            0.462                26
## 25 Korean   Kore…           0.524            0.435                63
## 26 Korean   Kore…           0.373            0.556                51
## 27 Korean   Kore…           0.224            0.633                67
## # … with 2 more variables: Total_Freq_Superior <dbl>, PoliteDiff <dbl>

Very consistent picture where two-handed gestures are used less in the superior context. Compute for how many this is:

both <- both %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

both %>% count(Language, Category)
## # A tibble: 6 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite     9
## 2 Catalan  more polite     4
## 3 Catalan  same            1
## 4 Korean   less polite     3
## 5 Korean   more polite     9
## 6 Korean   same            1

Create a model for big gestures:

both_mdl <- brm(Both_Hands | trials(Total_Freq) ~ Language_c * Condition_c +
                  (1 + Condition_c|ID),
                data = dyads, family = binomial,
                prior = my_priors, control = my_controls,
                seed = 42,
                init = 0, chains = 4, iter = my_iter, warmup = my_warmup)
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -mmacosx-version-min=10.13 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DBOOST_NO_AUTO_PTR  -include '/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/usr/local/include   -fPIC  -Wall -g -O2  -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:613:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:613:16: error: expected ';' after top level declarator
## namespace Eigen {
##                ^
##                ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
##          ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1

Summarize model:

summary(both_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Both_Hands | trials(Total_Freq) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   1.26      0.23     0.88     1.78 1.00     1732
## sd(Condition_c1)                0.85      0.21     0.50     1.33 1.00     3150
## cor(Intercept,Condition_c1)     0.71      0.18     0.24     0.94 1.00     3152
##                             Tail_ESS
## sd(Intercept)                   3495
## sd(Condition_c1)                4683
## cor(Intercept,Condition_c1)     4785
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -0.43      0.25    -0.94     0.06 1.00     1523
## Language_c1                  0.25      0.48    -0.70     1.16 1.00     1439
## Condition_c1                -0.08      0.21    -0.51     0.30 1.00     2460
## Language_c1:Condition_c1     0.86      0.38     0.11     1.62 1.00     2554
##                          Tail_ESS
## Intercept                    2554
## Language_c1                  2762
## Condition_c1                 3385
## Language_c1:Condition_c1     4001
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

Get the posterior probabilities of the both_mdl:

posts <- posterior_samples(both_mdl)

Posterior probability of the condition effect being above 0:

sum(posts$b_Condition_c1 > 0) / nrow(posts)
## [1] 0.356875

Posterior probability of the interaction effect being above 0:

sum(posts$`b_Language_c1:Condition_c1` > 0) / nrow(posts)
## [1] 0.98625

Posterior predictive checks to see whether our model could’ve predicted the data, vertical:

pp_check(both_mdl, nsamples = 100)

Analysis: Use of open-handed gestures

Overall number of gestures per condition:

dyads %>% group_by(Condition) %>% 
  summarize(Freq = sum(Shape_Open)) %>% 
  mutate(Prop = Freq / sum(Freq),
         Prop = round(Prop, 2),
         Percentage = str_c(Prop * 100, '%'))
## `summarise()` ungrouping output (override with `.groups` argument)
## # A tibble: 2 x 4
##   Condition  Freq  Prop Percentage
##   <chr>     <dbl> <dbl> <chr>     
## 1 Friend     1017  0.56 56%       
## 2 Superior    785  0.44 44%

Overall number of gestures per condition per language:

# Tally by language and condition:

freq_count <- dyads %>% group_by(Language, Condition) %>% 
  summarize(Freq = sum(Shape_Open))
## `summarise()` regrouping output by 'Language' (override with `.groups` argument)
# Take sums by language for calculating proportions, and calculate them:

freq_count <- freq_count %>% group_by(Language) %>% 
  summarize(Total = sum(Freq)) %>%
  right_join(freq_count) %>% 
  mutate(Prop = Freq / Total,
         Prop = round(Prop, 2),
         Percentage = str_c(Prop * 100, '%'))
## `summarise()` ungrouping output (override with `.groups` argument)
## Joining, by = "Language"
# Check:

freq_count
## # A tibble: 4 x 6
##   Language Total Condition  Freq  Prop Percentage
##   <chr>    <dbl> <chr>     <dbl> <dbl> <chr>     
## 1 Catalan   1056 Friend      603 0.570 57%       
## 2 Catalan   1056 Superior    453 0.43  43%       
## 3 Korean     746 Friend      414 0.55  55%       
## 4 Korean     746 Superior    332 0.45  45%

Check frequency of open-handed gestures per speaker:

open <- dyads %>% select(Language, ID, Condition, Shape_Open, Total_Freq) %>% 
  pivot_wider(names_from = Condition, values_from = c(Shape_Open, Total_Freq)) %>% 
  mutate(Shape_Open_Superior = Shape_Open_Superior / Total_Freq_Superior, 
         Shape_Open_Friend = Shape_Open_Friend / Total_Freq_Friend,
         PoliteDiff = Shape_Open_Superior - Shape_Open_Friend)

# Check:

open %>% print(n = Inf)
## # A tibble: 27 x 7
##    Language ID    Shape_Open_Frie… Shape_Open_Supe… Total_Freq_Frie…
##    <chr>    <chr>            <dbl>            <dbl>            <dbl>
##  1 Catalan  Cata…            0.656            0.769               61
##  2 Catalan  Cata…            0.934            0.781               61
##  3 Catalan  Cata…            1                0.843               58
##  4 Catalan  Cata…            0.713            0.390              101
##  5 Catalan  Cata…            0.804            0.608               56
##  6 Catalan  Cata…            0.9              0.465               40
##  7 Catalan  Cata…            0.903            0.5                 31
##  8 Catalan  Cata…            0.630            0.388               81
##  9 Catalan  Cata…            0.719            0.633               32
## 10 Catalan  Cata…            1                0.812               39
## 11 Catalan  Cata…            0.829            0.633               35
## 12 Catalan  Cata…            0.784            0.562               88
## 13 Catalan  Cata…            0.695            0.723               59
## 14 Catalan  Cata…            0.682            0.462               22
## 15 Korean   Kore…            0.582            0.539               91
## 16 Korean   Kore…            0.689            0.639               61
## 17 Korean   Kore…            0.226            0.438               31
## 18 Korean   Kore…            0.556            0.643               63
## 19 Korean   Kore…            0.545            0.6                 22
## 20 Korean   Kore…            0.565            0.432               62
## 21 Korean   Kore…            0.438            0.857               89
## 22 Korean   Kore…            0.667            0.8                 30
## 23 Korean   Kore…            0.333            0.182               33
## 24 Korean   Kore…            0.577            0.538               26
## 25 Korean   Kore…            0.905            0.839               63
## 26 Korean   Kore…            0.588            0.852               51
## 27 Korean   Kore…            0.866            0.939               67
## # … with 2 more variables: Total_Freq_Superior <dbl>, PoliteDiff <dbl>

Very consistent picture where two-handed gestures are used less in the superior context. Compute for how many this is:

open <- open %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

open %>% count(Language, Category)
## # A tibble: 4 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite    12
## 2 Catalan  more polite     2
## 3 Korean   less polite     6
## 4 Korean   more polite     7

Create a model for big gestures:

open_mdl <- brm(Shape_Open | trials(Total_Freq) ~ Language_c * Condition_c +
                  (1 + Condition_c|ID),
                data = dyads, family = binomial,
                prior = my_priors, control = my_controls,
                seed = 42,
                init = 0, chains = 4, iter = my_iter, warmup = my_warmup)
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -mmacosx-version-min=10.13 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DBOOST_NO_AUTO_PTR  -include '/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/usr/local/include   -fPIC  -Wall -g -O2  -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:613:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:613:16: error: expected ';' after top level declarator
## namespace Eigen {
##                ^
##                ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
##          ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1

Summarize model:

summary(open_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Shape_Open | trials(Total_Freq) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   0.85      0.15     0.61     1.18 1.00     2004
## sd(Condition_c1)                0.76      0.17     0.47     1.16 1.00     3016
## cor(Intercept,Condition_c1)    -0.17      0.26    -0.64     0.37 1.00     3386
##                             Tail_ESS
## sd(Intercept)                   3363
## sd(Condition_c1)                4708
## cor(Intercept,Condition_c1)     5094
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                    0.82      0.17     0.48     1.17 1.00     1447
## Language_c1                 -0.48      0.35    -1.19     0.19 1.00     1446
## Condition_c1                -0.38      0.19    -0.76    -0.02 1.00     2929
## Language_c1:Condition_c1     1.39      0.36     0.67     2.10 1.00     2703
##                          Tail_ESS
## Intercept                    2763
## Language_c1                  2597
## Condition_c1                 3962
## Language_c1:Condition_c1     4193
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

Get the posterior probability of the interaction and language effects for the open mdl:

posts <- posterior_samples(open_mdl)

Posterior probability of the condition effect being above 0:

sum(posts$b_Condition_c1 > 0) / nrow(posts)
## [1] 0.0205

Posterior probability of the interaction effect being above 0:

sum(posts$`b_Language_c1:Condition_c1` > 0) / nrow(posts)
## [1] 1

Posterior predictive checks to see whether our model could’ve predicted the data, vertical:

pp_check(open_mdl, nsamples = 100)

Analysis: path, manner, ground

What is the overall number of path, manner, and ground gestures?

dyads %>% summarize(Path = sum(Path),
                    Manner = sum(Manner),
                    Ground = sum(Ground))
## # A tibble: 1 x 3
##    Path Manner Ground
##   <dbl>  <dbl>  <dbl>
## 1   425    342    119

Calculate rate of path/manner/ground encoding per total gestures:

dyads <- mutate(dyads,
                Path_p = Path / Total_Freq,
                Manner_p = Manner / Total_Freq,
                Ground_p = Ground / Total_Freq)

Calculate path change by speaker and whether it changes based on superiority:

path <- dyads %>% select(Language, ID, Condition, Path_p) %>% 
  pivot_wider(names_from = Condition, values_from = Path_p) %>% 
  mutate(PoliteDiff = Superior - Friend)

# Check:

path %>% print(n = Inf)
## # A tibble: 27 x 5
##    Language ID         Friend Superior PoliteDiff
##    <chr>    <chr>       <dbl>    <dbl>      <dbl>
##  1 Catalan  Catalan_1  0.148    0.128    -0.0193 
##  2 Catalan  Catalan_11 0.115    0.110    -0.00517
##  3 Catalan  Catalan_12 0.103    0.157     0.0534 
##  4 Catalan  Catalan_13 0.0990   0.0976   -0.00145
##  5 Catalan  Catalan_14 0.0893   0.149     0.0594 
##  6 Catalan  Catalan_16 0.225    0.209    -0.0157 
##  7 Catalan  Catalan_2  0.258    0.075    -0.183  
##  8 Catalan  Catalan_3  0.0988   0.143     0.0441 
##  9 Catalan  Catalan_4  0.25     0.143    -0.107  
## 10 Catalan  Catalan_5  0.256    0.188    -0.0689 
## 11 Catalan  Catalan_6  0.257    0.267     0.00952
## 12 Catalan  Catalan_7  0.148    0.123    -0.0244 
## 13 Catalan  Catalan_8  0.153    0.128    -0.0249 
## 14 Catalan  Catalan_9  0.318    0.385     0.0664 
## 15 Korean   Korean_1   0.143    0.158     0.0150 
## 16 Korean   Korean_10  0.164    0.148    -0.0164 
## 17 Korean   Korean_11  0.355    0.312    -0.0423 
## 18 Korean   Korean_12  0.0794   0.119     0.0397 
## 19 Korean   Korean_13  0.227    0.1      -0.127  
## 20 Korean   Korean_2   0.129    0.189     0.0602 
## 21 Korean   Korean_3   0.135    0.224     0.0897 
## 22 Korean   Korean_4   0.2      0.167    -0.0333 
## 23 Korean   Korean_5   0.212    0.455     0.242  
## 24 Korean   Korean_6   0.423    0.462     0.0385 
## 25 Korean   Korean_7   0.111    0.145     0.0341 
## 26 Korean   Korean_8   0.196    0.0370   -0.159  
## 27 Korean   Korean_9   0.179    0.184     0.00457
# Speaker changes overall:

path <- path %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

path %>% count(Language, Category)
## # A tibble: 4 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite     9
## 2 Catalan  more polite     5
## 3 Korean   less polite     5
## 4 Korean   more polite     8

Same for manner:

manner <- dyads %>% select(Language, ID, Condition, Manner_p) %>% 
  pivot_wider(names_from = Condition, values_from = Manner_p) %>% 
  mutate(PoliteDiff = Superior - Friend)

# Check:

manner %>% print(n = Inf)
## # A tibble: 27 x 5
##    Language ID         Friend Superior PoliteDiff
##    <chr>    <chr>       <dbl>    <dbl>      <dbl>
##  1 Catalan  Catalan_1  0.148    0.0897  -0.0578  
##  2 Catalan  Catalan_11 0.148    0.0685  -0.0790  
##  3 Catalan  Catalan_12 0.155    0.118   -0.0375  
##  4 Catalan  Catalan_13 0.109    0.110    0.000845
##  5 Catalan  Catalan_14 0.0893   0.0676  -0.0217  
##  6 Catalan  Catalan_16 0.125    0.0698  -0.0552  
##  7 Catalan  Catalan_2  0.194    0.1     -0.0935  
##  8 Catalan  Catalan_3  0.111    0.0408  -0.0703  
##  9 Catalan  Catalan_4  0.188    0.0408  -0.147   
## 10 Catalan  Catalan_5  0.256    0.188   -0.0689  
## 11 Catalan  Catalan_6  0.171    0.167   -0.00476 
## 12 Catalan  Catalan_7  0.0909   0.0822  -0.00872 
## 13 Catalan  Catalan_8  0.153    0.128   -0.0249  
## 14 Catalan  Catalan_9  0.273    0       -0.273   
## 15 Korean   Korean_1   0.132    0.132   -0.000289
## 16 Korean   Korean_10  0.115    0.0984  -0.0164  
## 17 Korean   Korean_11  0.129    0.0625  -0.0665  
## 18 Korean   Korean_12  0.111    0.167    0.0556  
## 19 Korean   Korean_13  0.227    0.2     -0.0273  
## 20 Korean   Korean_2   0.161    0.0811  -0.0802  
## 21 Korean   Korean_3   0.135    0.184    0.0488  
## 22 Korean   Korean_4   0.2      0.133   -0.0667  
## 23 Korean   Korean_5   0.152    0.0909  -0.0606  
## 24 Korean   Korean_6   0.346    0.308   -0.0385  
## 25 Korean   Korean_7   0.111    0.0806  -0.0305  
## 26 Korean   Korean_8   0.176    0.185    0.00871 
## 27 Korean   Korean_9   0.149    0.163    0.0140
# Speaker changes overall:

manner <- manner %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

manner %>% count(Language, Category)
## # A tibble: 4 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite    13
## 2 Catalan  more polite     1
## 3 Korean   less polite     9
## 4 Korean   more polite     4

Same for ground:

ground <- dyads %>% select(Language, ID, Condition, Ground_p) %>% 
  pivot_wider(names_from = Condition, values_from = Ground_p) %>% 
  mutate(PoliteDiff = Superior - Friend)

# Check:

ground %>% print(n = Inf)
## # A tibble: 27 x 5
##    Language ID         Friend Superior PoliteDiff
##    <chr>    <chr>       <dbl>    <dbl>      <dbl>
##  1 Catalan  Catalan_1  0.0656   0.0769    0.0113 
##  2 Catalan  Catalan_11 0.0984   0.0685   -0.0299 
##  3 Catalan  Catalan_12 0.0345   0.0784    0.0439 
##  4 Catalan  Catalan_13 0.0297   0.0244   -0.00531
##  5 Catalan  Catalan_14 0.0179   0.0270    0.00917
##  6 Catalan  Catalan_16 0        0         0      
##  7 Catalan  Catalan_2  0.0968   0.05     -0.0468 
##  8 Catalan  Catalan_3  0.0247   0        -0.0247 
##  9 Catalan  Catalan_4  0.0625   0.0204   -0.0421 
## 10 Catalan  Catalan_5  0.0513   0        -0.0513 
## 11 Catalan  Catalan_6  0.114    0.0667   -0.0476 
## 12 Catalan  Catalan_7  0.0455   0.0822    0.0367 
## 13 Catalan  Catalan_8  0.102    0        -0.102  
## 14 Catalan  Catalan_9  0.0455   0.0769    0.0315 
## 15 Korean   Korean_1   0.0330   0.0263   -0.00665
## 16 Korean   Korean_10  0.0328   0.0164   -0.0164 
## 17 Korean   Korean_11  0.0323   0        -0.0323 
## 18 Korean   Korean_12  0.0476   0.0476    0      
## 19 Korean   Korean_13  0.0909   0        -0.0909 
## 20 Korean   Korean_2   0        0.0270    0.0270 
## 21 Korean   Korean_3   0.0449   0.0408   -0.00413
## 22 Korean   Korean_4   0.0667   0        -0.0667 
## 23 Korean   Korean_5   0        0         0      
## 24 Korean   Korean_6   0.115    0.0769   -0.0385 
## 25 Korean   Korean_7   0.0635   0.0645    0.00102
## 26 Korean   Korean_8   0.0588   0.0370   -0.0218 
## 27 Korean   Korean_9   0.0746   0.0408   -0.0338
# Speaker changes overall:

ground <- ground %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

ground %>% count(Language, Category)
## # A tibble: 6 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite     8
## 2 Catalan  more polite     5
## 3 Catalan  same            1
## 4 Korean   less polite     9
## 5 Korean   more polite     2
## 6 Korean   same            2

Create models:

# Path:

path_mdl <- brm(Path | trials(Total_Freq) ~ Language_c * Condition_c +
                  (1 + Condition_c|ID),
                data = dyads, family = binomial,
                prior = my_priors, control = my_controls,
                seed = 42,
                init = 0, chains = 4, iter = my_iter, warmup = my_warmup)

# Manner:

manner_mdl <- brm(Manner | trials(Total_Freq) ~ Language_c * Condition_c +
                    (1 + Condition_c|ID),
                  data = dyads, family = binomial,
                  prior = my_priors, control = my_controls,
                  seed = 42,
                  init = 0, chains = 4, iter = my_iter, warmup = my_warmup)

# Ground:

ground_mdl <- brm(Ground | trials(Total_Freq) ~ Language_c * Condition_c +
                    (1 + Condition_c|ID),
                  data = dyads, family = binomial,
                  prior = my_priors, control = my_controls,
                  seed = 42,
                  init = 0, chains = 4, iter = my_iter, warmup = my_warmup)

Summarize models:

summary(path_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Path | trials(Total_Freq) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   0.38      0.10     0.20     0.61 1.00     2517
## sd(Condition_c1)                0.15      0.11     0.01     0.42 1.00     3764
## cor(Intercept,Condition_c1)    -0.18      0.55    -0.97     0.90 1.00     6818
##                             Tail_ESS
## sd(Intercept)                   4210
## sd(Condition_c1)                3298
## cor(Intercept,Condition_c1)     4634
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -1.60      0.09    -1.79    -1.41 1.00     3012
## Language_c1                  0.18      0.19    -0.19     0.56 1.00     3544
## Condition_c1                -0.01      0.12    -0.24     0.21 1.00     8798
## Language_c1:Condition_c1     0.16      0.23    -0.29     0.61 1.00     8383
##                          Tail_ESS
## Intercept                    4080
## Language_c1                  4795
## Condition_c1                 6010
## Language_c1:Condition_c1     4491
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
summary(manner_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Manner | trials(Total_Freq) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   0.15      0.10     0.01     0.37 1.00     2129
## sd(Condition_c1)                0.14      0.11     0.01     0.42 1.00     4603
## cor(Intercept,Condition_c1)     0.04      0.58    -0.94     0.96 1.00     7802
##                             Tail_ESS
## sd(Intercept)                   3138
## sd(Condition_c1)                3332
## cor(Intercept,Condition_c1)     5173
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -1.93      0.07    -2.06    -1.79 1.00     6838
## Language_c1                  0.26      0.14    -0.01     0.54 1.00     8097
## Condition_c1                -0.32      0.13    -0.57    -0.07 1.00     9140
## Language_c1:Condition_c1     0.40      0.25    -0.09     0.88 1.00    10076
##                          Tail_ESS
## Intercept                    5226
## Language_c1                  6157
## Condition_c1                 6011
## Language_c1:Condition_c1     6247
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
summary(ground_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Ground | trials(Total_Freq) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   0.33      0.18     0.02     0.72 1.00     1772
## sd(Condition_c1)                0.28      0.22     0.01     0.81 1.00     3852
## cor(Intercept,Condition_c1)     0.09      0.57    -0.93     0.96 1.00     7624
##                             Tail_ESS
## sd(Intercept)                   2362
## sd(Condition_c1)                3996
## cor(Intercept,Condition_c1)     5740
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -3.17      0.13    -3.44    -2.93 1.00     6770
## Language_c1                 -0.19      0.25    -0.68     0.31 1.00     6504
## Condition_c1                -0.32      0.22    -0.76     0.09 1.00     8442
## Language_c1:Condition_c1    -0.11      0.42    -0.96     0.72 1.00     9392
##                          Tail_ESS
## Intercept                    5764
## Language_c1                  5747
## Condition_c1                 5179
## Language_c1:Condition_c1     6091
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

Get the posterior probabilities of each:

path_posts <- posterior_samples(path_mdl)
manner_posts <- posterior_samples(manner_mdl)
ground_posts <- posterior_samples(ground_mdl)

Posterior probability of the condition effect being above 0:

sum(path_posts$b_Condition_c1 > 0) / nrow(path_posts)
## [1] 0.45575
sum(manner_posts$b_Condition_c1 > 0) / nrow(manner_posts)
## [1] 0.00725
sum(ground_posts$b_Condition_c1 > 0) / nrow(ground_posts)
## [1] 0.066375

Posterior probability of the interaction effect being above 0:

sum(path_posts$`b_Language_c1:Condition_c1` > 0) / nrow(path_posts)
## [1] 0.761875
sum(manner_posts$`b_Language_c1:Condition_c1` > 0) / nrow(manner_posts)
## [1] 0.943625
sum(ground_posts$`b_Language_c1:Condition_c1` > 0) / nrow(ground_posts)
## [1] 0.39475

Check the posterior predictive probabilities for each:

pp_check(path_mdl, nsamples = 100)

pp_check(manner_mdl, nsamples = 100)

pp_check(ground_mdl, nsamples = 100)

Analysis: Viewpoint

Overall number of Character, Observer, and Dual:

dyads %>% summarize(Character = sum(Character),
                    Observer = sum(Observer),
                    Dual = sum(Observer))
## # A tibble: 1 x 3
##   Character Observer  Dual
##       <dbl>    <dbl> <dbl>
## 1       199      300   300

Calculate difference in percentage between character and observer viewpoint:

dyads <- mutate(dyads,
                Character_p = Character / (Character + Observer),
                Observer_p = Observer / (Character + Observer),
                Viewpoint_diff = Character_p - Observer_p)

Do this for speakers to see if this changes for superior versus friend:

viewpoint <- dyads %>% select(Language, ID, Condition, Viewpoint_diff) %>% 
  pivot_wider(names_from = Condition, values_from = Viewpoint_diff) %>% 
  mutate(PoliteDiff = Superior - Friend)

# Check:

viewpoint %>% print(n = Inf)
## # A tibble: 27 x 5
##    Language ID          Friend Superior PoliteDiff
##    <chr>    <chr>        <dbl>    <dbl>      <dbl>
##  1 Catalan  Catalan_1   0.273    0.0909    -0.182 
##  2 Catalan  Catalan_11 -0.6     -0.5        0.1   
##  3 Catalan  Catalan_12  0       -0.273     -0.273 
##  4 Catalan  Catalan_13  0.273    0         -0.273 
##  5 Catalan  Catalan_14  0.143   -0.286     -0.429 
##  6 Catalan  Catalan_16 -0.111   -0.6       -0.489 
##  7 Catalan  Catalan_2  -0.111    0.333      0.444 
##  8 Catalan  Catalan_3   0.200   -0.714     -0.914 
##  9 Catalan  Catalan_4  -0.333   -1         -0.667 
## 10 Catalan  Catalan_5  -0.167   -0.5       -0.333 
## 11 Catalan  Catalan_6  -0.818   -0.6        0.218 
## 12 Catalan  Catalan_7  -0.231   -0.455     -0.224 
## 13 Catalan  Catalan_8  -0.40    -0.40       0     
## 14 Catalan  Catalan_9  -0.5     -1         -0.5   
## 15 Korean   Korean_1    0.143   -0.385     -0.527 
## 16 Korean   Korean_10  -0.167   -0.40      -0.233 
## 17 Korean   Korean_11  -0.333   -0.6       -0.267 
## 18 Korean   Korean_12   0.5      0         -0.5   
## 19 Korean   Korean_13   0.6      1          0.400 
## 20 Korean   Korean_2    0.231   -0.667     -0.897 
## 21 Korean   Korean_3   -0.286   -0.0769     0.209 
## 22 Korean   Korean_4    0.333    0.200     -0.133 
## 23 Korean   Korean_5   -0.25    -1         -0.75  
## 24 Korean   Korean_6    0.0909   0         -0.0909
## 25 Korean   Korean_7    0.111   -0.40      -0.511 
## 26 Korean   Korean_8   -0.385   -0.200      0.185 
## 27 Korean   Korean_9    0.231   -0.5       -0.731

Very consistent picture where two-handed gestures are used less in the superior context. Compute for how many this is:

viewpoint <- viewpoint %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

viewpoint %>% count(Language, Category)
## # A tibble: 5 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite    10
## 2 Catalan  more polite     3
## 3 Catalan  same            1
## 4 Korean   less polite    10
## 5 Korean   more polite     3

Create a model for viewpoint. We will represent this as a binomial problem since this is a choice of character viewpoint or observer viewpoint. For this we need a “trial” variable (N of the binomial distribution), which will be the sum of observer and character gestures:

dyads <- mutate(dyads,
                Trial = Observer + Character)

Fit the model:

viewpoint_mdl <- brm(Character | trials(Trial) ~ Language_c * Condition_c +
                       (1 + Condition_c|ID),
                     data = dyads, family = binomial,
                     prior = my_priors, control = my_controls,
                     seed = 42,
                     init = 0, chains = 4, iter = my_iter, warmup = my_warmup)

Summarize model:

summary(viewpoint_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Character | trials(Trial) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   0.36      0.18     0.03     0.74 1.00     1984
## sd(Condition_c1)                0.24      0.18     0.01     0.68 1.00     4091
## cor(Intercept,Condition_c1)     0.01      0.57    -0.94     0.95 1.00     6991
##                             Tail_ESS
## sd(Intercept)                   2788
## sd(Condition_c1)                3216
## cor(Intercept,Condition_c1)     4801
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -0.46      0.13    -0.71    -0.21 1.00     5380
## Language_c1                  0.36      0.25    -0.12     0.87 1.00     6108
## Condition_c1                -0.60      0.20    -1.00    -0.20 1.00     8528
## Language_c1:Condition_c1    -0.12      0.40    -0.91     0.66 1.00     9066
##                          Tail_ESS
## Intercept                    5406
## Language_c1                  5077
## Condition_c1                 5661
## Language_c1:Condition_c1     5796
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

Get the posterior probability of the interaction and language effects for vertical:

posts <- posterior_samples(viewpoint_mdl)

Posterior probability of the condition effect being above 0:

sum(posts$b_Condition_c1 > 0) / nrow(posts)
## [1] 0.001125

Posterior probability of the interaction effect being above 0:

sum(posts$`b_Language_c1:Condition_c1` > 0) / nrow(posts)
## [1] 0.390875

Posterior predictive checks to see whether our model could’ve predicted the data, vertical:

pp_check(viewpoint_mdl, nsamples = 100)